From d52ba01f4a82538d06c916514fa0f4848397d699 Mon Sep 17 00:00:00 2001 From: Eddie Dong Date: Thu, 9 Jun 2011 16:24:09 +0800 Subject: [PATCH] Nested VMX: Emulation of guest VMCLEAR Signed-off-by: Qing He Signed-off-by: Eddie Dong Signed-off-by: Tim Deegan Committed-by: Tim Deegan --- xen/arch/x86/hvm/vmx/vmx.c | 6 ++- xen/arch/x86/hvm/vmx/vvmx.c | 70 ++++++++++++++++++++++++++++++ xen/include/asm-x86/hvm/vmx/vmcs.h | 2 + xen/include/asm-x86/hvm/vmx/vvmx.h | 1 + 4 files changed, 78 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 574a6a2f2e..93f46a7ce8 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2439,6 +2439,11 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs) update_guest_eip(); break; + case EXIT_REASON_VMCLEAR: + if ( nvmx_handle_vmclear(regs) == X86EMUL_OKAY ) + update_guest_eip(); + break; + case EXIT_REASON_VMPTRLD: if ( nvmx_handle_vmptrld(regs) == X86EMUL_OKAY ) update_guest_eip(); @@ -2451,7 +2456,6 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs) case EXIT_REASON_MWAIT_INSTRUCTION: case EXIT_REASON_MONITOR_INSTRUCTION: - case EXIT_REASON_VMCLEAR: case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMREAD: case EXIT_REASON_VMRESUME: diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index 12004cb849..1a4a2519a1 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -26,6 +26,8 @@ #include #include +static void nvmx_purge_vvmcs(struct vcpu *v); + int nvmx_vcpu_initialise(struct vcpu *v) { struct nestedvmx *nvmx = &vcpu_2_nvmx(v); @@ -53,6 +55,7 @@ void nvmx_vcpu_destroy(struct vcpu *v) { struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); + nvmx_purge_vvmcs(v); if ( nvcpu->nv_n2vmcx ) { __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx)); free_xenheap_page(nvcpu->nv_n2vmcx); @@ -352,6 +355,14 @@ static void vmreturn(struct cpu_user_regs *regs, enum vmx_ops_result ops_res) regs->eflags = eflags; } +static void __clear_current_vvmcs(struct vcpu *v) +{ + struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); + + if ( nvcpu->nv_n2vmcx ) + __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx)); +} + static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg) { struct nestedvmx *nvmx = &vcpu_2_nvmx(v); @@ -371,6 +382,25 @@ static inline void map_io_bitmap_all(struct vcpu *v) __map_io_bitmap (v, IO_BITMAP_B); } +static void nvmx_purge_vvmcs(struct vcpu *v) +{ + struct nestedvmx *nvmx = &vcpu_2_nvmx(v); + struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); + int i; + + __clear_current_vvmcs(v); + if ( nvcpu->nv_vvmcxaddr != VMCX_EADDR ) + hvm_unmap_guest_frame (nvcpu->nv_vvmcx); + nvcpu->nv_vvmcx == NULL; + nvcpu->nv_vvmcxaddr = VMCX_EADDR; + for (i=0; i<2; i++) { + if ( nvmx->iobitmap[i] ) { + hvm_unmap_guest_frame (nvmx->iobitmap[i]); + nvmx->iobitmap[i] = NULL; + } + } +} + /* * VMX instructions handling */ @@ -419,6 +449,7 @@ int nvmx_handle_vmxoff(struct cpu_user_regs *regs) if ( rc != X86EMUL_OKAY ) return rc; + nvmx_purge_vvmcs(v); nvmx->vmxon_region_pa = 0; vmreturn(regs, VMSUCCEED); @@ -443,6 +474,9 @@ int nvmx_handle_vmptrld(struct cpu_user_regs *regs) goto out; } + if ( nvcpu->nv_vvmcxaddr != gpa ) + nvmx_purge_vvmcs(v); + if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR ) { nvcpu->nv_vvmcx = hvm_map_guest_frame_rw (gpa >> PAGE_SHIFT); @@ -478,3 +512,39 @@ int nvmx_handle_vmptrst(struct cpu_user_regs *regs) return X86EMUL_OKAY; } +int nvmx_handle_vmclear(struct cpu_user_regs *regs) +{ + struct vcpu *v = current; + struct vmx_inst_decoded decode; + struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); + unsigned long gpa = 0; + int rc; + + rc = decode_vmx_inst(regs, &decode, &gpa, 0); + if ( rc != X86EMUL_OKAY ) + return rc; + + if ( gpa & 0xfff ) + { + vmreturn(regs, VMFAIL_INVALID); + goto out; + } + + if ( gpa != nvcpu->nv_vvmcxaddr && nvcpu->nv_vvmcxaddr != VMCX_EADDR ) + { + gdprintk(XENLOG_WARNING, + "vmclear gpa %lx not the same as current vmcs %"PRIpaddr"\n", + gpa, nvcpu->nv_vvmcxaddr); + vmreturn(regs, VMSUCCEED); + goto out; + } + if ( nvcpu->nv_vvmcxaddr != VMCX_EADDR ) + __set_vvmcs(nvcpu->nv_vvmcx, NVMX_LAUNCH_STATE, 0); + nvmx_purge_vvmcs(v); + + vmreturn(regs, VMSUCCEED); + +out: + return X86EMUL_OKAY; +} + diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index eb3a149dbb..1c055ffb0d 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -381,6 +381,8 @@ enum vmcs_field { HOST_SYSENTER_EIP = 0x00006c12, HOST_RSP = 0x00006c14, HOST_RIP = 0x00006c16, + /* A virtual VMCS field used for nestedvmx only */ + NVMX_LAUNCH_STATE = 0x00006c20, }; #define VMCS_VPID_WIDTH 16 diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h index fbe34ab034..3533a4e546 100644 --- a/xen/include/asm-x86/hvm/vmx/vvmx.h +++ b/xen/include/asm-x86/hvm/vmx/vvmx.h @@ -155,6 +155,7 @@ void __set_vvmcs(void *vvmcs, u32 vmcs_encoding, u64 val); void nvmx_destroy_vmcs(struct vcpu *v); int nvmx_handle_vmptrld(struct cpu_user_regs *regs); int nvmx_handle_vmptrst(struct cpu_user_regs *regs); +int nvmx_handle_vmclear(struct cpu_user_regs *regs); #endif /* __ASM_X86_HVM_VVMX_H__ */ -- 2.30.2